bitkeeper revision 1.1108.54.1 (410f6f7a8sttwKzJ5rdNrGdqP7qDCw)
authorcl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Tue, 3 Aug 2004 10:56:58 +0000 (10:56 +0000)
committercl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Tue, 3 Aug 2004 10:56:58 +0000 (10:56 +0000)
Make dma_alloc_coherent request contiguous memory from Xen.

linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c

index ab83af48bab3e90caf0bc7458b0e14d4e429c99e..6b604383e8a5591146d9749b8ca89942d6b36148 100644 (file)
@@ -17,18 +17,62 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
                           dma_addr_t *dma_handle, int gfp)
 {
        void *ret;
+       unsigned int order = get_order(size);
+       unsigned long vstart;
+
        /* ignore region specifiers */
        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
 
        if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
                gfp |= GFP_DMA;
 
-       ret = (void *)__get_free_pages(gfp, get_order(size));
+       ret = (void *)vstart = __get_free_pages(gfp, order);
+       if (ret == NULL)
+               return ret;
 
-       if (ret != NULL) {
-               memset(ret, 0, size);
-               *dma_handle = virt_to_bus(ret);
+       /*
+        * Ensure multi-page extents are contiguous in machine memory.
+        * This code could be cleaned up some, and the number of
+        * hypercalls reduced.
+        */
+       if (size > PAGE_SIZE) {
+               pgd_t         *pgd; 
+               pmd_t         *pmd;
+               pte_t         *pte;
+               unsigned long  pfn, i;
+               /* 1. Zap current PTEs, giving away the underlying pages. */
+               for (i = 0; i < (1<<order); i++) {
+                       pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+                       pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+                       pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+                       pfn = pte->pte_low >> PAGE_SHIFT;
+                       queue_l1_entry_update(pte, 0);
+                       flush_page_update_queue();
+                       if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation, 
+                                                 &pfn, 1, 0) != 1) BUG();
+               }
+               /* 2. Get a new contiguous memory extent. */
+               if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
+                                         &pfn, 1, order) != 1) BUG();
+               /* 3. Map the new extent in place of old pages. */
+               for (i = 0; i < (1<<order); i++) {
+                       pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+                       pmd = pmd_offset(pgd, (vstart + (i*PAGE_SIZE)));
+                       pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+                       queue_l1_entry_update(
+                               pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
+                       queue_machphys_update(
+                               pfn+i, (__pa(ret)>>PAGE_SHIFT)+i);
+                       phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
+                               pfn+i;
+                        flush_page_update_queue();
+               }
+               flush_page_update_queue();
        }
+
+       memset(ret, 0, size);
+       *dma_handle = virt_to_bus(ret);
+
        return ret;
 }